import numpy as npimport pandas as pdimport matplotlib.pyplot as pltimport seaborn as sns
Code
# prompt: read 100 csv files with pandas in order from 1 to 100 and with the format {number}{u}.csv. Also there is two types of .csv files: u and v. I want to stack for exmaple 1u and 1v and then stack the 100 files to a new dimentionarys = []for i inrange(1, 101): filename_u =f"OceanFlow/{i}u.csv" filename_v =f"OceanFlow/{i}v.csv" h = pd.read_csv(filename_u, header=None).to_numpy().T v = pd.read_csv(filename_v, header=None).to_numpy().T arys.append(np.transpose(np.array([h,v], dtype=np.float32), [1,2,0]))data = np.array(arys)data.shape
# prompt: find the indeces of the smallest variance on the variance_in_time matrix but bigger than cero# Get the indices of the smallest variance values that are greater than zero.smallest_variance_indices = np.where(np.logical_and(variance_in_time >0, variance_in_time == np.min(variance_in_time[variance_in_time >0])))# Print the indices.print((smallest_variance_indices*np.array([3])))
[[510]
[363]]
Maximus x-axis velocity
Code
# prompt: I can get the maximun value for the x-axis whit this code: new_dim[:, 0,:,:].max(). But, how can I know in which of the 100 times occurs and the indeces?max_value = data[:, :,:,0].max()max_indices = np.where(data[:, :,:,0] == max_value)print("Maximum x-axis velocity:", max_value)print("Time index:", max_indices[0][0]*np.array([3]))print("y index:", max_indices[1][0]*np.array([3]))print("z index:", max_indices[2][0]*np.array([3]))
Maximum x-axis velocity: 5.7993
Time index: [84]
y index: [1041]
z index: [543]
UserWarning: frames=<zip object at 0x7e27dddb0a80> which we can infer the length of, did not pass an explicit *save_count* and passed cache_frame_data=True. To avoid a possibly unbounded cache, frame data caching has been disabled. To suppress this warning either pass `cache_frame_data=False` or `save_count=MAX_FRAMES`.
animation = anim.FuncAnimation(fig, frame, frames=state, blit=True)
UserWarning: frames=<zip object at 0x7e27ddeee180> which we can infer the length of, did not pass an explicit *save_count* and passed cache_frame_data=True. To avoid a possibly unbounded cache, frame data caching has been disabled. To suppress this warning either pass `cache_frame_data=False` or `save_count=MAX_FRAMES`.
animation = anim.FuncAnimation(fig, frame, frames=state, blit=True)
UserWarning: frames=<zip object at 0x7e27da30a100> which we can infer the length of, did not pass an explicit *save_count* and passed cache_frame_data=True. To avoid a possibly unbounded cache, frame data caching has been disabled. To suppress this warning either pass `cache_frame_data=False` or `save_count=MAX_FRAMES`.
animation = anim.FuncAnimation(fig, frame, frames=state, blit=True)
UserWarning: frames=<zip object at 0x7e27da280a40> which we can infer the length of, did not pass an explicit *save_count* and passed cache_frame_data=True. To avoid a possibly unbounded cache, frame data caching has been disabled. To suppress this warning either pass `cache_frame_data=False` or `save_count=MAX_FRAMES`.
animation = anim.FuncAnimation(fig, frame, frames=state, blit=True)
# We'll calculate all four points we'll need for parts (a) and (b) here.p4_scan_products =list(map(lambda p: list(scan_position(p, 0.001)), [ [1400//3, 400//3], [400//3, 600//3], [500//3, 1400//3], [1200//3, 1200//3]]))
/usr/local/lib/python3.10/dist-packages/sklearn/gaussian_process/_gpr.py:629: ConvergenceWarning: lbfgs failed to converge (status=2):
ABNORMAL_TERMINATION_IN_LNSRCH.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
_check_optimize_result("lbfgs", opt_res)
Code
for i, pos_results inenumerate(p4_d_scan_products):for j, comp_results inenumerate(pos_results): plt.subplot(len(p4_d_scan_products), 2, 1+2*i + j) plot_scan_results(comp_results, level_range=(0, 200))plt.tight_layout()